[XEN] Stricter TLB-flush discipline when unshadowing pagetables
authorTim Deegan <Tim.Deegan@xensource.com>
Fri, 20 Oct 2006 15:06:53 +0000 (16:06 +0100)
committerTim Deegan <Tim.Deegan@xensource.com>
Fri, 20 Oct 2006 15:06:53 +0000 (16:06 +0100)
It's OK for the guest to see old entries in the TLB, but not for the
shadow fault handler to see them in its linear mappings.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
xen/arch/x86/mm/shadow/common.c
xen/arch/x86/mm/shadow/multi.c
xen/include/asm-x86/perfc_defn.h

index 0d1c76881d477f182cf531aca2cd019a1e7e2ec2..c053e64ffb28b1d83ef9221ab7df6d98421be359 100644 (file)
@@ -567,13 +567,18 @@ void shadow_prealloc(struct domain *d, unsigned int order)
 {
     /* Need a vpcu for calling unpins; for now, since we don't have
      * per-vcpu shadows, any will do */
-    struct vcpu *v = d->vcpu[0];
+    struct vcpu *v, *v2;
     struct list_head *l, *t;
     struct page_info *pg;
+    cpumask_t flushmask = CPU_MASK_NONE;
     mfn_t smfn;
 
     if ( chunk_is_available(d, order) ) return; 
     
+    v = current;
+    if ( v->domain != d )
+        v = d->vcpu[0];
+
     /* Stage one: walk the list of top-level pages, unpinning them */
     perfc_incrc(shadow_prealloc_1);
     list_for_each_backwards_safe(l, t, &d->arch.shadow.toplevel_shadows)
@@ -592,28 +597,30 @@ void shadow_prealloc(struct domain *d, unsigned int order)
      * loaded in cr3 on some vcpu.  Walk them, unhooking the non-Xen
      * mappings. */
     perfc_incrc(shadow_prealloc_2);
-    v = current;
-    if ( v->domain != d )
-        v = d->vcpu[0];
-    /* Walk the list from the tail: recently used toplevels have been pulled
-     * to the head */
     list_for_each_backwards_safe(l, t, &d->arch.shadow.toplevel_shadows)
     {
         pg = list_entry(l, struct page_info, list);
         smfn = page_to_mfn(pg);
         shadow_unhook_mappings(v, smfn);
 
-        /* Need to flush TLB if we've altered our own tables */
-        if ( !shadow_mode_external(d) &&
-             (pagetable_get_pfn(current->arch.shadow_table[0]) == mfn_x(smfn)
-              || pagetable_get_pfn(current->arch.shadow_table[1]) == mfn_x(smfn)
-              || pagetable_get_pfn(current->arch.shadow_table[2]) == mfn_x(smfn)
-              || pagetable_get_pfn(current->arch.shadow_table[3]) == mfn_x(smfn)
-                 ) )
-            local_flush_tlb();
-        
+        /* Remember to flush TLBs: we have removed shadow entries that 
+         * were in use by some vcpu(s). */
+        for_each_vcpu(d, v2) 
+        {
+            if ( pagetable_get_pfn(v2->arch.shadow_table[0]) == mfn_x(smfn)
+                 || pagetable_get_pfn(v2->arch.shadow_table[1]) == mfn_x(smfn)
+                 || pagetable_get_pfn(v2->arch.shadow_table[2]) == mfn_x(smfn) 
+                 || pagetable_get_pfn(v2->arch.shadow_table[3]) == mfn_x(smfn)
+                )
+                cpus_or(flushmask, v2->vcpu_dirty_cpumask, flushmask);
+        }
+
         /* See if that freed up a chunk of appropriate size */
-        if ( chunk_is_available(d, order) ) return;
+        if ( chunk_is_available(d, order) ) 
+        {
+            flush_tlb_mask(flushmask);
+            return;
+        }
     }
     
     /* Nothing more we can do: all remaining shadows are of pages that
@@ -2216,6 +2223,10 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all)
         if ( all ) 
             domain_crash(v->domain);
     }
+
+    /* Need to flush TLBs now, so that linear maps are safe next time we 
+     * take a fault. */
+    flush_tlb_mask(v->domain->domain_dirty_cpumask);
 }
 
 void
index f6b89a6ec3fff074877d7053ef93ccab5e5a0c09..b70c979fbd0d9f9eaaf4a6eb4b773b34d3bdaf42 100644 (file)
@@ -2562,41 +2562,11 @@ static inline void check_for_early_unshadow(struct vcpu *v, mfn_t gmfn)
          sh_mfn_is_a_page_table(gmfn) )
     {
         u32 flags = mfn_to_page(gmfn)->shadow_flags;
-        mfn_t smfn;
         if ( !(flags & (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64)) )
         {
             perfc_incrc(shadow_early_unshadow);
             sh_remove_shadows(v, gmfn, 1, 0 /* Fast, can fail to unshadow */ );
-            return;
-        }
-        /* SHF_unhooked_mappings is set to make sure we only unhook
-         * once in a single batch of updates. It is reset when this
-         * top-level page is loaded into CR3 again */
-        if ( !(flags & SHF_unhooked_mappings) ) 
-        {
-            perfc_incrc(shadow_early_unshadow_top);
-            mfn_to_page(gmfn)->shadow_flags |= SHF_unhooked_mappings;
-            if ( flags & SHF_L2_32 )
-            {
-                smfn = get_shadow_status(v, gmfn, PGC_SH_l2_32_shadow);
-                shadow_unhook_mappings(v, smfn);
-            }
-            if ( flags & SHF_L2_PAE ) 
-            {
-                smfn = get_shadow_status(v, gmfn, PGC_SH_l2_pae_shadow);
-                shadow_unhook_mappings(v, smfn);
-            }
-            if ( flags & SHF_L2H_PAE ) 
-            {
-                smfn = get_shadow_status(v, gmfn, PGC_SH_l2h_pae_shadow);
-                shadow_unhook_mappings(v, smfn);
-            }
-            if ( flags & SHF_L4_64 ) 
-            {
-                smfn = get_shadow_status(v, gmfn, PGC_SH_l4_64_shadow);
-                shadow_unhook_mappings(v, smfn);
-            }
-        }
+        } 
     }
     v->arch.shadow.last_emulated_mfn = mfn_x(gmfn);
 #endif
index 4baea4d3fd31fd72091fc059deb9302db4b6c2d5..7204348fcaed98e9fef15f032a36551d2722118d 100644 (file)
@@ -76,7 +76,6 @@ PERFCOUNTER_CPU(shadow_writeable_bf,   "shadow writeable brute-force")
 PERFCOUNTER_CPU(shadow_mappings,       "shadow removes all mappings")
 PERFCOUNTER_CPU(shadow_mappings_bf,    "shadow rm-mappings brute-force")
 PERFCOUNTER_CPU(shadow_early_unshadow, "shadow unshadows for fork/exit")
-PERFCOUNTER_CPU(shadow_early_unshadow_top, "shadow unhooks for fork/exit")
 PERFCOUNTER_CPU(shadow_unshadow,       "shadow unshadows a page")
 PERFCOUNTER_CPU(shadow_up_pointer,     "shadow unshadow by up-pointer")
 PERFCOUNTER_CPU(shadow_unshadow_bf,    "shadow unshadow brute-force")